x86: Avoid deep recusrsion when destroying a domain and reaping pagetables.
authorKeir Fraser <keir.fraser@citrix.com>
Tue, 13 May 2008 09:16:54 +0000 (10:16 +0100)
committerKeir Fraser <keir.fraser@citrix.com>
Tue, 13 May 2008 09:16:54 +0000 (10:16 +0100)
From: Jan Beulich <jbeulich@novell.com>
Signed-off-by: Keir Fraser <keir.fraser@citrix.com>
xen/arch/x86/domain.c
xen/arch/x86/mm.c
xen/include/asm-x86/config.h

index 5c8fe5f557f72d23b741bf3d6512260067a01db1..6dd3f3331058a417a5900e00c0411b8ffc645176 100644 (file)
@@ -1725,6 +1725,27 @@ static int relinquish_memory(
         if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
             put_page(page);
 
+#ifdef DOMAIN_DESTRUCT_AVOID_RECURSION
+        /*
+         * Forcibly drop reference counts of page tables above top most (which
+         * were skipped to prevent long latencies due to deep recursion - see
+         * the special treatment in free_lX_table()).
+         */
+        y = page->u.inuse.type_info;
+        if ( (type < PGT_root_page_table) &&
+             unlikely(((y + PGT_type_mask) &
+                       (PGT_type_mask|PGT_validated)) == type) )
+        {
+            BUG_ON((y & PGT_count_mask) >=
+                   (page->count_info & PGC_count_mask));
+            while ( y & PGT_count_mask )
+            {
+                put_page_and_type(page);
+                y = page->u.inuse.type_info;
+            }
+        }
+#endif
+
         /*
          * Forcibly invalidate top-most, still valid page tables at this point
          * to break circular 'linear page table' references. This is okay
@@ -1896,6 +1917,11 @@ int domain_relinquish_resources(struct domain *d)
         /* fallthrough */
 
     case RELMEM_done:
+#ifdef DOMAIN_DESTRUCT_AVOID_RECURSION
+        ret = relinquish_memory(d, &d->page_list, PGT_l1_page_table);
+        if ( ret )
+            return ret;
+#endif
         break;
 
     default:
index ef51a4181fe430ae1fd0fd07ad868889729d5749..6839d53b5040206fe5ed46c8e53254873a4fc50a 100644 (file)
@@ -1320,6 +1320,11 @@ static void free_l3_table(struct page_info *page)
     l3_pgentry_t *pl3e;
     int           i;
 
+#ifdef DOMAIN_DESTRUCT_AVOID_RECURSION
+    if ( d->arch.relmem == RELMEM_dom_l3 )
+        return;
+#endif
+
     pl3e = map_domain_page(pfn);
 
     for ( i = 0; i < L3_PAGETABLE_ENTRIES; i++ )
@@ -1343,6 +1348,11 @@ static void free_l4_table(struct page_info *page)
     l4_pgentry_t *pl4e = page_to_virt(page);
     int           i;
 
+#ifdef DOMAIN_DESTRUCT_AVOID_RECURSION
+    if ( d->arch.relmem == RELMEM_dom_l4 )
+        return;
+#endif
+
     for ( i = 0; i < L4_PAGETABLE_ENTRIES; i++ )
         if ( is_guest_l4_slot(d, i) )
             put_page_from_l4e(pl4e[i], pfn);
index 006910c29927e58a3c748647bef7f348cf96c49a..87058ee3cd61dcd1a3f92544db4ffbc2cecfb9d4 100644 (file)
 #define CONFIG_HOTPLUG 1
 #define CONFIG_HOTPLUG_CPU 1
 
+/*
+ * Avoid deep recursion when tearing down pagetables during domain destruction,
+ * causing dom0 to become unresponsive and Xen to miss time-critical softirq
+ * deadlines. This will ultimately be replaced by built-in preemptibility of
+ * get_page_type().
+ */
+#define DOMAIN_DESTRUCT_AVOID_RECURSION 1
+
 #define HZ 100
 
 #define OPT_CONSOLE_STR "vga"